library(ROBITaxonomy)
##
## Attachement du package : 'ROBITaxonomy'
## L'objet suivant est masqué depuis 'package:stats':
##
## family
library(ROBITools)
## Warning: remplacement de l'importation précédente 'dbplyr::ident' par
## 'dplyr::ident' lors du chargement de 'ROBITools'
## Warning: remplacement de l'importation précédente 'dbplyr::sql' par 'dplyr::sql'
## lors du chargement de 'ROBITools'
## Warning: remplacement de l'importation précédente 'dplyr::union' par
## 'igraph::union' lors du chargement de 'ROBITools'
## Warning: remplacement de l'importation précédente 'dplyr::as_data_frame' par
## 'igraph::as_data_frame' lors du chargement de 'ROBITools'
## Warning: remplacement de l'importation précédente 'dplyr::groups' par
## 'igraph::groups' lors du chargement de 'ROBITools'
## Warning: remplacement de l'importation précédente 'ROBITaxonomy::path' par
## 'igraph::path' lors du chargement de 'ROBITools'
## Warning: remplacement de l'importation précédente 'ROBITaxonomy::parent' par
## 'igraph::parent' lors du chargement de 'ROBITools'
## ROBITools package
library(vegan)
## Le chargement a nécessité le package : permute
## Le chargement a nécessité le package : lattice
## This is vegan 2.5-7
##
## Attachement du package : 'vegan'
## L'objet suivant est masqué depuis 'package:ROBITools':
##
## rarefy
library(ade4)
library(ggplot2)
library(tidyverse)
## ── Attaching packages ─────────────────────────────────────── tidyverse 1.3.1 ──
## ✓ tibble 3.1.2 ✓ dplyr 1.0.7
## ✓ tidyr 1.1.3 ✓ stringr 1.4.0
## ✓ readr 1.4.0 ✓ forcats 0.5.1
## ✓ purrr 0.3.4
## ── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
## x dplyr::filter() masks stats::filter()
## x dplyr::lag() masks stats::lag()
library(permute)
library(lattice)
# The palette with grey:
cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
# The palette with black:
cbbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
--------------------------------------------------------------------------
# The Read contingency table
```r
reads = read.csv("Data/Faeces/FE.Spermatophyta.samples.reads.csv",
header = TRUE,
row.names = 1)
reads = as.matrix(reads)
samples = read.csv("Data/Faeces/FE.Spermatophyta.samples.samples.csv",
header = TRUE,
row.names = 1)
motus = read.csv("Data/Faeces/FE.Spermatophyta.samples.motus.csv",
header = TRUE,
row.names = 1)
Sper01 = metabarcoding.data(reads = reads,
samples = samples,
motus = motus)
motus.hist = colMeans(reads(Sper01))
Sper01@motus$mean_ref_freq = motus.hist
Sper01 = Sper01[,order(motus.hist,decreasing = TRUE)]
motus(Sper01)[1:20,c("scientific_name","mean_ref_freq", "rank")]
table(motus(Sper01)$rank)
##
## family genus no rank order species subfamily subgenus subtribe
## 9 24 3 2 4 7 2 2
## tribe
## 13
samples.metadata = read_csv("Data/Faeces/sampling_dates.csv")
##
## ── Column specification ────────────────────────────────────────────────────────
## cols(
## sample_id = col_character(),
## Animal_ID = col_character(),
## Sample_number = col_double(),
## Date = col_character(),
## Sample_time = col_time(format = ""),
## times_from_birch = col_double()
## )
samples.metadata = inner_join(samples.metadata,Sper01@samples,by="sample_id")
rownames(samples.metadata) = as.character(samples.metadata$sample_id)
## Warning: Setting row names on a tibble is deprecated.
samples.metadata = samples.metadata[rownames(Sper01),]
Sper01@samples = as.data.frame(samples.metadata)
rownames(Sper01@samples) = rownames(as.character(Sper01@samples$sample_id))
Sper01@samples[Sper01@samples$animal_id=="X","times_from_birch"] =
Sper01@samples[Sper01@samples$animal_id=="X","times_from_birch"] + 6
Sper01@samples[Sper01@samples$animal_id=="Y","times_from_birch"] =
Sper01@samples[Sper01@samples$animal_id=="Y","times_from_birch"] + 3
Sper01@samples[Sper01@samples$animal_id=="Z","times_from_birch"] =
Sper01@samples[Sper01@samples$animal_id=="Z","times_from_birch"] + 4
View(Sper01@samples)
Sper01$hellinger = decostand(reads(Sper01), method = "hellinger")
Sper01$relfreq = decostand(reads(Sper01), method = "total")
Sper01$presence = reads(Sper01) >0
Animal_id = rep("Unknown", nrow(samples(Sper01)))
Animal_id[which(samples(Sper01)$animal_id == "X")] = "9/10"
Animal_id[which(samples(Sper01)$animal_id == "Y")] = "10/10"
Animal_id[which(samples(Sper01)$animal_id == "Z")] = "12/10"
Sper01@samples$Animal_id=factor(Animal_id, levels=c("9/10","10/10","12/10"))
Animal = Sper01@samples$Animal_id
Sper01@reads samples(Sper01)[1:210,c(“Animal_id”,“replicate”, “Sample_number”)]
list(Sper01@samples)
## [[1]]
## sample_id Animal_ID Sample_number Date Sample_time times_from_birch
## 1 X_10 X 10 26/01/2018 11:05:00 62
## 2 X_11 X 11 26/01/2018 16:34:00 67
## 3 X_12 X 12 26/01/2018 22:00:00 73
## 4 X_14 X 14 27/01/2018 09:00:00 84
## 5 X_15 X 15 27/01/2018 16:06:00 91
## 6 X_16 X 16 27/01/2018 23:59:00 99
## 7 X_17 X 17 28/01/2018 06:00:00 105
## 8 X_18 X 18 28/01/2018 11:32:00 110
## 9 X_19 X 19 28/01/2018 16:37:00 115
## 10 X_2 X 2 24/01/2018 08:16:00 11
## 11 X_20 X 20 28/01/2018 00:00:00 99
## 12 X_21 X 21 29/01/2018 06:00:00 129
## 13 X_22 X 22 29/01/2018 10:38:00 133
## 14 X_23 X 23 29/01/2018 16:25:00 139
## 15 X_24 X 24 29/01/2018 23:57:00 147
## 16 X_25 X 25 30/01/2018 03:00:00 150
## 17 X_26 X 26 30/01/2018 11:58:00 159
## 18 X_27 X 27 30/01/2018 14:21:00 161
## 19 X_28 X 28 30/01/2018 23:55:00 171
## 20 X_29 X 29 31/01/2018 05:56:00 177
## 21 X_3 X 3 24/01/2018 17:00:00 20
## 22 X_30 X 30 31/01/2018 11:55:00 183
## 23 X_31 X 31 31/01/2018 16:07:00 187
## 24 X_33 X 33 01/02/2018 03:01:00 198
## 25 X_34 X 34 01/02/2018 09:21:00 204
## 26 X_35 X 35 01/02/2018 16:28:00 211
## 27 X_36 X 36 01/02/2018 23:57:00 219
## 28 X_37 X 37 02/02/2018 03:01:00 222
## 29 X_38 X 38 02/02/2018 08:42:00 228
## 30 X_39 X 39 02/02/2018 16:24:00 235
## 31 X_4 X 4 24/01/2018 22:27:00 25
## 32 X_41 X 41 03/02/2018 11:34:00 254
## 33 X_42 X 42 03/02/2018 20:02:00 263
## 34 X_44 X 44 04/02/2018 22:14:00 289
## 35 X_51 X 51 08/02/2018 09:30:00 372
## 36 X_53 X 53 09/02/2018 11:10:00 398
## 37 X_54 X 54 09/02/2018 19:00:00 406
## 38 X_56 X 56 10/02/2018 19:06:00 430
## 39 X_57 X 57 11/02/2018 18:58:00 454
## 40 X_59 X 59 12/02/2018 18:58:00 478
## 41 X_60 X 60 13/02/2018 11:55:00 495
## 42 X_63 X 63 14/02/2018 12:50:00 520
## 43 X_64 X 64 15/02/2018 10:23:00 541
## 44 X_65 X 65 16/02/2018 09:51:00 565
## 45 X_66 X 66 17/02/2018 09:53:00 589
## 46 X_68 X 68 19/02/2018 12:51:00 640
## 47 X_70 X 70 21/02/2018 13:22:00 688
## 48 X_74 X 74 22/01/2018 19:33:00 -25
## 49 X_75 X 75 02/03/2018 14:55:00 906
## 50 X_76 X 76 06/03/2018 15:20:00 1002
## 51 X_77 X 77 09/03/2018 15:08:00 1074
## 52 X_78 X 78 16/03/2018 14:50:00 1242
## 53 X_79 X 79 20/03/2018 14:38:00 1337
## 54 X_80 X 80 23/03/2018 14:22:00 1409
## 55 X_9 X 9 26/01/2018 03:25:00 54
## 56 Y_1 Y 1 22/01/2018 11:26:00 -34
## 57 Y_11 Y 11 26/01/2018 16:38:00 67
## 58 Y_13 Y 13 27/01/2018 06:00:00 81
## 59 Y_14 Y 14 27/01/2018 08:50:00 84
## 60 Y_18 Y 18 28/01/2018 11:54:00 111
## 61 Y_21 Y 21 29/01/2018 06:00:00 129
## 62 Y_23 Y 23 29/01/2018 14:14:00 137
## 63 Y_24 Y 24 29/01/2018 23:59:00 147
## 64 Y_25 Y 25 30/01/2018 03:22:00 150
## 65 Y_26 Y 26 30/01/2018 07:44:00 155
## 66 Y_28 Y 28 30/01/2018 23:55:00 171
## 67 Y_29 Y 29 31/01/2018 02:45:00 174
## 68 Y_3 Y 3 24/01/2018 13:30:00 16
## 69 Y_31 Y 31 31/01/2018 16:12:00 187
## 70 Y_32 Y 32 31/01/2018 23:56:00 195
## 71 Y_33 Y 33 01/02/2018 03:00:00 198
## 72 Y_34 Y 34 01/02/2018 08:32:00 203
## 73 Y_36 Y 36 01/02/2018 22:00:00 217
## 74 Y_38 Y 38 02/02/2018 08:45:00 228
## 75 Y_39 Y 39 02/02/2018 16:29:00 235
## 76 Y_4 Y 4 24/01/2018 22:48:00 26
## 77 Y_40 Y 40 03/02/2018 10:35:00 253
## 78 Y_41 Y 41 03/02/2018 16:35:00 259
## 79 Y_42 Y 42 04/02/2018 10:14:00 277
## 80 Y_43 Y 43 04/02/2018 22:12:00 289
## 81 Y_45 Y 45 05/02/2018 19:25:00 310
## 82 Y_5 Y 5 25/01/2018 03:19:00 30
## 83 Y_50 Y 50 08/02/2018 09:13:00 372
## 84 Y_51 Y 51 08/02/2018 19:19:00 382
## 85 Y_53 Y 53 09/02/2018 19:26:00 406
## 86 Y_57 Y 57 12/02/2018 07:59:00 467
## 87 Y_58 Y 58 12/02/2018 16:00:00 475
## 88 Y_59 Y 59 13/02/2018 09:39:00 492
## 89 Y_6 Y 6 25/01/2018 08:10:00 35
## 90 Y_61 Y 61 14/02/2018 08:01:00 515
## 91 Y_69 Y 69 21/02/2018 12:59:00 688
## 92 Y_7 Y 7 25/01/2018 13:01:00 40
## 93 Y_70 Y 70 22/02/2018 13:26:00 712
## 94 Y_71 Y 71 31/01/2018 14:31:00 185
## 95 Y_72 Y 72 02/03/2018 15:25:00 906
## 96 Y_74 Y 74 20/03/2018 14:45:00 1338
## 97 Y_8 Y 8 25/01/2018 21:21:00 48
## 98 Y_9 Y 9 26/01/2018 03:21:00 54
## 99 Z_1 Z 1 22/01/2018 14:00:00 -31
## 100 Z_10 Z 10 26/01/2018 05:59:00 57
## 101 Z_11 Z 11 26/01/2018 11:07:00 62
## 102 Z_12 Z 12 26/01/2018 13:24:00 64
## 103 Z_13 Z 13 26/01/2018 22:00:00 73
## 104 Z_14 Z 14 27/01/2018 06:00:00 81
## 105 Z_15 Z 15 27/01/2018 08:50:00 84
## 106 Z_16 Z 16 27/01/2018 14:30:00 89
## 107 Z_17 Z 17 27/01/2018 23:59:00 99
## 108 Z_18 Z 18 28/01/2018 06:00:00 105
## 109 Z_19 Z 19 28/01/2018 11:55:00 111
## 110 Z_21 Z 21 28/01/2018 23:59:00 123
## 111 Z_22 Z 22 29/01/2018 06:00:00 129
## 112 Z_23 Z 23 29/01/2018 10:42:00 133
## 113 Z_24 Z 24 29/01/2018 14:07:00 137
## 114 Z_25 Z 25 29/01/2018 23:50:00 147
## 115 Z_27 Z 27 30/01/2018 11:41:00 158
## 116 Z_28 Z 28 30/01/2018 14:11:00 161
## 117 Z_3 Z 3 24/01/2018 11:10:00 14
## 118 Z_31 Z 31 31/01/2018 11:07:00 182
## 119 Z_32 Z 32 31/01/2018 14:23:00 185
## 120 Z_33 Z 33 31/01/2018 21:57:00 193
## 121 Z_34 Z 34 01/02/2018 02:54:00 198
## 122 Z_35 Z 35 01/02/2018 09:24:00 204
## 123 Z_36 Z 36 01/02/2018 16:24:00 211
## 124 Z_37 Z 37 01/02/2018 22:21:00 217
## 125 Z_38 Z 38 02/02/2018 02:56:00 222
## 126 Z_4 Z 4 24/01/2018 13:30:00 16
## 127 Z_40 Z 40 02/02/2018 16:20:00 235
## 128 Z_42 Z 42 03/02/2018 10:24:00 253
## 129 Z_43 Z 43 03/02/2018 17:40:00 260
## 130 Z_44 Z 44 04/02/2018 10:14:00 277
## 131 Z_46 Z 46 05/02/2018 11:14:00 302
## 132 Z_48 Z 48 06/02/2018 10:40:00 325
## 133 Z_49 Z 49 06/02/2018 19:26:00 334
## 134 Z_5 Z 5 24/01/2018 22:28:00 25
## 135 Z_51 Z 51 07/02/2018 16:19:00 355
## 136 Z_52 Z 52 08/02/2018 08:23:00 371
## 137 Z_54 Z 54 09/02/2018 10:30:00 397
## 138 Z_55 Z 55 09/02/2018 19:28:00 406
## 139 Z_56 Z 56 10/02/2018 10:17:00 421
## 140 Z_59 Z 59 12/02/2018 09:17:00 468
## 141 Z_6 Z 6 25/01/2018 03:08:00 30
## 142 Z_60 Z 60 12/02/2018 16:00:00 475
## 143 Z_62 Z 62 13/02/2018 19:07:00 502
## 144 Z_63 Z 63 14/02/2018 08:41:00 515
## 145 Z_65 Z 65 15/02/2018 09:19:00 540
## 146 Z_66 Z 66 16/02/2018 11:21:00 566
## 147 Z_67 Z 67 17/02/2018 10:28:00 589
## 148 Z_68 Z 68 18/02/2018 09:56:00 613
## 149 Z_69 Z 69 19/02/2018 12:52:00 640
## 150 Z_7 Z 7 25/01/2018 11:00:00 38
## 151 Z_70 Z 70 20/02/2018 09:14:00 660
## 152 Z_71 Z 71 21/02/2018 12:29:00 687
## 153 Z_72 Z 72 22/02/2018 13:03:00 712
## 154 Z_73 Z 73 10/02/2018 09:11:00 420
## 155 Z_74 Z 74 25/01/2018 23:13:00 50
## 156 Z_75 Z 75 02/03/2018 14:50:00 906
## 157 Z_76 Z 76 02/03/2018 14:57:00 906
## 158 Z_77 Z 77 09/03/2018 14:47:00 1074
## 159 Z_78 Z 78 13/03/2018 15:50:00 1171
## 160 Z_79 Z 79 20/03/2018 14:41:00 1337
## 161 Z_8 Z 8 25/01/2018 15:50:00 43
## 162 Z_80 Z 80 23/03/2018 14:10:00 1409
## 163 X_1 X 1 24/01/2018 03:12:00 6
## 164 X_32 X 32 31/01/2018 23:58:00 195
## 165 X_40 X 40 02/02/2018 19:50:00 239
## 166 X_43 X 43 04/02/2018 10:31:00 277
## 167 X_45 X 45 05/02/2018 11:16:00 302
## 168 X_46 X 46 05/02/2018 19:23:00 310
## 169 X_47 X 47 06/02/2018 08:42:00 324
## 170 X_48 X 48 06/02/2018 19:28:00 334
## 171 X_49 X 49 07/02/2018 09:28:00 348
## 172 X_52 X 52 08/02/2018 19:15:00 382
## 173 X_55 X 55 10/02/2018 10:13:00 421
## 174 X_58 X 58 12/02/2018 09:17:00 468
## 175 X_5 X 5 25/01/2018 03:15:00 30
## 176 X_62 X 62 14/02/2018 09:24:00 516
## 177 X_67 X 67 18/02/2018 09:56:00 613
## 178 X_69 X 69 20/02/2018 08:16:00 659
## 179 X_6 X 6 25/01/2018 11:00:00 38
## 180 X_71 X 71 22/02/2018 12:33:00 711
## 181 X_72 X 72 22/01/2018 14:15:00 -31
## 182 X_73 X 73 22/01/2018 16:00:00 -29
## 183 X_7 X 7 25/01/2018 17:10:00 44
## 184 X_8 X 8 25/01/2018 23:17:00 50
## 185 Y_10 Y 10 26/01/2018 09:00:00 60
## 186 Y_12 Y 12 26/01/2018 22:00:00 73
## 187 Y_15 Y 15 27/01/2018 14:35:00 89
## 188 Y_16 Y 16 27/01/2018 22:00:00 97
## 189 Y_17 Y 17 28/01/2018 06:00:00 105
## 190 Y_19 Y 19 28/01/2018 16:43:00 116
## 191 Y_20 Y 20 28/01/2018 22:00:00 121
## 192 Y_22 Y 22 29/01/2018 09:13:00 132
## 193 Y_27 Y 27 30/01/2018 14:30:00 161
## 194 Y_30 Y 30 31/01/2018 11:58:00 183
## 195 Y_35 Y 35 01/02/2018 13:43:00 209
## 196 Y_37 Y 37 02/02/2018 06:01:00 225
## 197 Y_54 Y 54 10/02/2018 09:52:00 421
## 198 Y_55 Y 55 10/02/2018 19:15:00 430
## 199 Y_60 Y 60 13/02/2018 19:01:00 502
## 200 Y_62 Y 62 14/02/2018 16:51:00 524
## 201 Y_63 Y 63 15/02/2018 08:57:00 540
## 202 Y_64 Y 64 16/02/2018 11:48:00 567
## 203 Y_65 Y 65 17/02/2018 10:48:00 590
## 204 Y_66 Y 66 18/02/2018 12:50:00 616
## 205 Y_67 Y 67 19/02/2018 12:52:00 640
## 206 Y_68 Y 68 20/02/2018 08:33:00 659
## 207 Y_73 Y 73 09/03/2018 14:42:00 1074
## 208 Z_26 Z 26 30/01/2018 03:00:00 150
## 209 Z_29 Z 29 30/01/2018 23:55:00 171
## 210 Z_2 Z 2 24/01/2018 01:17:00 4
## 211 Z_41 Z 41 02/02/2018 19:46:00 238
## 212 Z_47 Z 47 05/02/2018 16:02:00 307
## 213 Z_50 Z 50 07/02/2018 09:43:00 348
## 214 Z_57 Z 57 10/02/2018 19:06:00 430
## 215 Z_58 Z 58 11/02/2018 18:51:00 454
## 216 Z_9 Z 9 25/01/2018 21:13:00 48
## name replicate animal_id Animal_id
## 1 <NA> NA X 9/10
## 2 <NA> NA X 9/10
## 3 <NA> NA X 9/10
## 4 <NA> NA X 9/10
## 5 <NA> NA X 9/10
## 6 <NA> NA X 9/10
## 7 <NA> NA X 9/10
## 8 <NA> NA X 9/10
## 9 <NA> NA X 9/10
## 10 <NA> NA X 9/10
## 11 <NA> NA X 9/10
## 12 <NA> NA X 9/10
## 13 <NA> NA X 9/10
## 14 <NA> NA X 9/10
## 15 <NA> NA X 9/10
## 16 <NA> NA X 9/10
## 17 <NA> NA X 9/10
## 18 <NA> NA X 9/10
## 19 <NA> NA X 9/10
## 20 <NA> NA X 9/10
## 21 <NA> NA X 9/10
## 22 <NA> NA X 9/10
## 23 <NA> NA X 9/10
## 24 <NA> NA X 9/10
## 25 <NA> NA X 9/10
## 26 <NA> NA X 9/10
## 27 <NA> NA X 9/10
## 28 <NA> NA X 9/10
## 29 <NA> NA X 9/10
## 30 <NA> NA X 9/10
## 31 <NA> NA X 9/10
## 32 <NA> NA X 9/10
## 33 <NA> NA X 9/10
## 34 <NA> NA X 9/10
## 35 <NA> NA X 9/10
## 36 <NA> NA X 9/10
## 37 <NA> NA X 9/10
## 38 <NA> NA X 9/10
## 39 <NA> NA X 9/10
## 40 <NA> NA X 9/10
## 41 <NA> NA X 9/10
## 42 <NA> NA X 9/10
## 43 <NA> NA X 9/10
## 44 <NA> NA X 9/10
## 45 <NA> NA X 9/10
## 46 <NA> NA X 9/10
## 47 <NA> NA X 9/10
## 48 <NA> NA X 9/10
## 49 <NA> NA X 9/10
## 50 <NA> NA X 9/10
## 51 <NA> NA X 9/10
## 52 <NA> NA X 9/10
## 53 <NA> NA X 9/10
## 54 <NA> NA X 9/10
## 55 <NA> NA X 9/10
## 56 <NA> NA Y 10/10
## 57 <NA> NA Y 10/10
## 58 <NA> NA Y 10/10
## 59 <NA> NA Y 10/10
## 60 <NA> NA Y 10/10
## 61 <NA> NA Y 10/10
## 62 <NA> NA Y 10/10
## 63 <NA> NA Y 10/10
## 64 <NA> NA Y 10/10
## 65 <NA> NA Y 10/10
## 66 <NA> NA Y 10/10
## 67 <NA> NA Y 10/10
## 68 <NA> NA Y 10/10
## 69 <NA> NA Y 10/10
## 70 <NA> NA Y 10/10
## 71 <NA> NA Y 10/10
## 72 <NA> NA Y 10/10
## 73 <NA> NA Y 10/10
## 74 <NA> NA Y 10/10
## 75 <NA> NA Y 10/10
## 76 <NA> NA Y 10/10
## 77 <NA> NA Y 10/10
## 78 <NA> NA Y 10/10
## 79 <NA> NA Y 10/10
## 80 <NA> NA Y 10/10
## 81 <NA> NA Y 10/10
## 82 <NA> NA Y 10/10
## 83 <NA> NA Y 10/10
## 84 <NA> NA Y 10/10
## 85 <NA> NA Y 10/10
## 86 <NA> NA Y 10/10
## 87 <NA> NA Y 10/10
## 88 <NA> NA Y 10/10
## 89 <NA> NA Y 10/10
## 90 <NA> NA Y 10/10
## 91 <NA> NA Y 10/10
## 92 <NA> NA Y 10/10
## 93 <NA> NA Y 10/10
## 94 <NA> NA Y 10/10
## 95 <NA> NA Y 10/10
## 96 <NA> NA Y 10/10
## 97 <NA> NA Y 10/10
## 98 <NA> NA Y 10/10
## 99 <NA> NA Z 12/10
## 100 <NA> NA Z 12/10
## 101 <NA> NA Z 12/10
## 102 <NA> NA Z 12/10
## 103 <NA> NA Z 12/10
## 104 <NA> NA Z 12/10
## 105 <NA> NA Z 12/10
## 106 <NA> NA Z 12/10
## 107 <NA> NA Z 12/10
## 108 <NA> NA Z 12/10
## 109 <NA> NA Z 12/10
## 110 <NA> NA Z 12/10
## 111 <NA> NA Z 12/10
## 112 <NA> NA Z 12/10
## 113 <NA> NA Z 12/10
## 114 <NA> NA Z 12/10
## 115 <NA> NA Z 12/10
## 116 <NA> NA Z 12/10
## 117 <NA> NA Z 12/10
## 118 <NA> NA Z 12/10
## 119 <NA> NA Z 12/10
## 120 <NA> NA Z 12/10
## 121 <NA> NA Z 12/10
## 122 <NA> NA Z 12/10
## 123 <NA> NA Z 12/10
## 124 <NA> NA Z 12/10
## 125 <NA> NA Z 12/10
## 126 <NA> NA Z 12/10
## 127 <NA> NA Z 12/10
## 128 <NA> NA Z 12/10
## 129 <NA> NA Z 12/10
## 130 <NA> NA Z 12/10
## 131 <NA> NA Z 12/10
## 132 <NA> NA Z 12/10
## 133 <NA> NA Z 12/10
## 134 <NA> NA Z 12/10
## 135 <NA> NA Z 12/10
## 136 <NA> NA Z 12/10
## 137 <NA> NA Z 12/10
## 138 <NA> NA Z 12/10
## 139 <NA> NA Z 12/10
## 140 <NA> NA Z 12/10
## 141 <NA> NA Z 12/10
## 142 <NA> NA Z 12/10
## 143 <NA> NA Z 12/10
## 144 <NA> NA Z 12/10
## 145 <NA> NA Z 12/10
## 146 <NA> NA Z 12/10
## 147 <NA> NA Z 12/10
## 148 <NA> NA Z 12/10
## 149 <NA> NA Z 12/10
## 150 <NA> NA Z 12/10
## 151 <NA> NA Z 12/10
## 152 <NA> NA Z 12/10
## 153 <NA> NA Z 12/10
## 154 <NA> NA Z 12/10
## 155 <NA> NA Z 12/10
## 156 <NA> NA Z 12/10
## 157 <NA> NA Z 12/10
## 158 <NA> NA Z 12/10
## 159 <NA> NA Z 12/10
## 160 <NA> NA Z 12/10
## 161 <NA> NA Z 12/10
## 162 <NA> NA Z 12/10
## 163 X_1_R1 1 X 9/10
## 164 X_32_R1 1 X 9/10
## 165 X_40_R1 1 X 9/10
## 166 X_43_R1 1 X 9/10
## 167 X_45_R1 1 X 9/10
## 168 X_46_R1 1 X 9/10
## 169 X_47_R1 1 X 9/10
## 170 X_48_R1 1 X 9/10
## 171 X_49_R1 1 X 9/10
## 172 X_52_R1 1 X 9/10
## 173 X_55_R1 1 X 9/10
## 174 X_58_R1 1 X 9/10
## 175 X_5_R1 1 X 9/10
## 176 X_62_R1 1 X 9/10
## 177 X_67_R1 1 X 9/10
## 178 X_69_R1 1 X 9/10
## 179 X_6_R1 1 X 9/10
## 180 X_71_R1 1 X 9/10
## 181 X_72_R1 1 X 9/10
## 182 X_73_R1 1 X 9/10
## 183 X_7_R1 1 X 9/10
## 184 X_8_R1 1 X 9/10
## 185 Y_10_R1 1 Y 10/10
## 186 Y_12_R1 1 Y 10/10
## 187 Y_15_R1 1 Y 10/10
## 188 Y_16_R1 1 Y 10/10
## 189 Y_17_R1 1 Y 10/10
## 190 Y_19_R1 1 Y 10/10
## 191 Y_20_R1 1 Y 10/10
## 192 Y_22_R1 1 Y 10/10
## 193 Y_27_R1 1 Y 10/10
## 194 Y_30_R1 1 Y 10/10
## 195 Y_35_R1 1 Y 10/10
## 196 Y_37_R1 1 Y 10/10
## 197 Y_54_R1 1 Y 10/10
## 198 Y_55_R1 1 Y 10/10
## 199 Y_60_R1 1 Y 10/10
## 200 Y_62_R1 1 Y 10/10
## 201 Y_63_R1 1 Y 10/10
## 202 Y_64_R1 1 Y 10/10
## 203 Y_65_R1 1 Y 10/10
## 204 Y_66_R1 1 Y 10/10
## 205 Y_67_R1 1 Y 10/10
## 206 Y_68_R1 1 Y 10/10
## 207 Y_73_R1 1 Y 10/10
## 208 Z_26_R1 1 Z 12/10
## 209 Z_29_R1 1 Z 12/10
## 210 Z_2_R1 1 Z 12/10
## 211 Z_41_R1 1 Z 12/10
## 212 Z_47_R1 1 Z 12/10
## 213 Z_50_R1 1 Z 12/10
## 214 Z_57_R1 1 Z 12/10
## 215 Z_58_R1 1 Z 12/10
## 216 Z_9_R1 1 Z 12/10
taxo = read.taxonomy("Data/ncbi20210212")
Betula.taxid = ecofind(taxo,"^Fagales$")
is.a.Betula = is.subcladeof(taxonomy = taxo,Sper01@motus$taxid,Betula.taxid)
is.a.Betula[is.na(is.a.Betula)]=FALSE
sum(is.a.Betula,na.rm = TRUE)
## [1] 5
Sper01@motus[is.a.Betula,c("scientific_name","mean_ref_freq")]
Betula = Sper01@reads[,is.a.Betula]
plot2 = ggplot(data = as.data.frame(Sper01@reads),
aes(x = samples(Sper01)$times_from_birch/24,
y = GHP1_00000007 + GHP1_00000030 + GHP1_00000681 + GHP3_00000153 + GHP1_00003018)) +
geom_point(aes(shape = Animal, color = Animal)) +
scale_colour_grey(guide = "legend") +
scale_x_continuous(breaks = 5) +
xlim(-2,30) +
scale_y_log10() +
geom_vline (xintercept = 26, colour = "black") +
theme_minimal() +
theme(panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) +
labs(y="Proportion of Betulaceae DNA",
x="Time (days after Betula pubescens was fed)") +
labs(y=expression('Proportion of'~italic(Betulaceae)~ 'DNA'),
x=expression('Time (days after'~italic(Betula)~ ~italic( pubescens)~ 'was fed)'),
fill="Var1")
## Scale for 'x' is already present. Adding another scale for 'x', which will
## replace the existing scale.
plot2
## Warning: Transformation introduced infinite values in continuous y-axis
## Warning: Removed 15 rows containing missing values (geom_point).
ggsave("GH.Time_for_publication.png", plot = plot2, width = 25, height = 16, units = c("cm"))
## Warning: Transformation introduced infinite values in continuous y-axis
## Warning: Removed 15 rows containing missing values (geom_point).
panel.grid.major = element_blank(),
plot3 = ggplot(data = as.data.frame(Sper01@reads),
aes(x = samples(Sper01)$times_from_birch/24,
y = GHP1_00000007 + GHP1_00000030 + GHP1_00000681 + GHP3_00000153 + GHP1_00003018,
color = Animal)) +
geom_point(aes(shape = Animal, color = Animal)) +
scale_x_continuous(breaks = c(0, 10, 20, 30)) +
xlim(-2,30) +
scale_y_log10() +
geom_vline (xintercept = 26, colour = "black") +
theme_minimal() +
theme(panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) +
labs(y="Proportion of Betulaceae DNA",
x="Time (days after Betula pubescens was fed)") +
labs(y=expression('Proportion of'~italic(Betulaceae)~ 'DNA'),
x=expression('Time (days after'~italic(Betula)~ ~italic( pubescens)~ 'was fed)'),
fill="Var1")
## Scale for 'x' is already present. Adding another scale for 'x', which will
## replace the existing scale.
plot3
## Warning: Transformation introduced infinite values in continuous y-axis
## Warning: Removed 15 rows containing missing values (geom_point).
ggsave("GH.Time_for_publication_colors.png", plot = plot3, width = 25, height = 16, units = c("cm"))
## Warning: Transformation introduced infinite values in continuous y-axis
## Warning: Removed 15 rows containing missing values (geom_point).
plot4 = ggplot(data = as.data.frame(Sper01@reads),
aes(x = samples(Sper01)$times_from_birch/24,
y = GHP1_00000007 + GHP1_00000030 + GHP1_00000681 + GHP3_00000153 + GHP1_00003018,
color = Animal)) +
scale_x_continuous(breaks = c(0, 10, 20, 30)) +
xlim(-2,30) +
geom_vline (xintercept = 26, colour = "black") +
geom_smooth(method = "loess", se = FALSE) +
theme_minimal() +
theme(panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) +
labs(y="Proportion of Betulaceae DNA",
x="Time (days after Betula pubescens was fed)") +
labs(y=expression('Proportion of'~italic(Betulaceae)~ 'DNA'),
x=expression('Time (days after'~italic(Betula)~ ~italic( pubescens)~ 'was fed)'),
fill="Var1")
## Scale for 'x' is already present. Adding another scale for 'x', which will
## replace the existing scale.
plot4
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 15 rows containing non-finite values (stat_smooth).
plot5 = ggplot(data = as.data.frame(Sper01@reads),
aes(x = samples(Sper01)$times_from_birch/24,
y = GHP1_00000007 + GHP1_00000030 + GHP1_00000681 + GHP3_00000153 + GHP1_00003018,
color = Animal)) +
scale_x_continuous(breaks = c(0, 10, 20, 30)) +
xlim(-2,30) +
geom_vline (xintercept = 26, colour = "black") +
geom_smooth(method = "loess", se = TRUE) +
theme_minimal() +
theme(panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) +
labs(y="Proportion of Betulaceae DNA",
x="Time (days after Betula pubescens was fed)") +
labs(y=expression('Proportion of'~italic(Betulaceae)~ 'DNA'),
x=expression('Time (days after'~italic(Betula)~ ~italic( pubescens)~ 'was fed)'),
fill="Var1")
## Scale for 'x' is already present. Adding another scale for 'x', which will
## replace the existing scale.
plot5
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 15 rows containing non-finite values (stat_smooth).
plot6 = ggplot(data = as.data.frame(Sper01@reads),
aes(x = samples(Sper01)$times_from_birch/24,
y = GHP1_00000007 + GHP1_00000030 + GHP1_00000681 + GHP3_00000153 + GHP1_00003018,
color = Animal)) +
scale_x_continuous(breaks = c(0, 10, 20, 30)) +
stat_summary_bin(fun.y = mean, geom = "line") +
xlim(-2,30) +
geom_vline (xintercept = 26, colour = "black") +
theme_minimal() +
theme(panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) +
labs(y="Proportion of Betulaceae DNA",
x="Time (days after Betula pubescens was fed)") +
labs(y=expression('Proportion of'~italic(Betulaceae)~ 'DNA'),
x=expression('Time (days after'~italic(Betula)~ ~italic( pubescens)~ 'was fed)'),
fill="Var1")
## Warning: `fun.y` is deprecated. Use `fun` instead.
## Scale for 'x' is already present. Adding another scale for 'x', which will
## replace the existing scale.
plot6
## Warning: Removed 15 rows containing non-finite values (stat_summary_bin).
geom_line(aes(color = Animal), size = 0.5, stat = “mean”) +
plot3 = ggplot(data = as.data.frame(Sper01@reads),
aes(x = samples(Sper01)$times_from_birch/24,
y = GHP1_00000007 + GHP1_00000030 + GHP1_00000681 + GHP3_00000153 + GHP1_00003018)) +
geom_point(aes(shape = Animal), size = 2) +
scale_x_continuous(breaks = c(0, 10, 20, 30)) +
scale_shape_manual(values=c(4, 2, 1))+
xlim(-2,30) +
geom_vline (xintercept = 26, colour = "black") +
theme_minimal() +
theme(panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) +
labs(y="Proportion of Betulaceae DNA",
x="Time (days after Betula pubescens was fed)") +
labs(y=expression('Proportion of'~italic(Betulaceae)~ 'DNA'),
x=expression('Time (days after'~italic(Betula)~ ~italic( pubescens)~ 'was fed)'),
fill="Var1")
## Scale for 'x' is already present. Adding another scale for 'x', which will
## replace the existing scale.
plot3
## Warning: Removed 15 rows containing missing values (geom_point).
ggsave("GH.Time_for_publication_black.png", plot = plot3, width = 25, height = 16, units = c("cm"))
## Warning: Removed 15 rows containing missing values (geom_point).
plot3 = ggplot(data = as.data.frame(Sper01@reads),
aes(x = samples(Sper01)$times_from_birch/24,
y = GHP1_00000007 + GHP1_00000030 + GHP1_00000681 + GHP3_00000153 + GHP1_00003018,
color = Animal)) +
geom_point(aes(color = Animal)) +
scale_x_continuous(breaks = c(0, 10, 20, 30)) +
stat_summary_bin(fun.y = mean, geom = "line") +
xlim(-2,30) +
geom_vline (xintercept = 26, colour = "black") +
theme_minimal() +
theme(panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) +
labs(y="Proportion of Betulaceae DNA",
x="Time (days after Betula pubescens was fed)") +
labs(y=expression('Proportion of'~italic(Betulaceae)~ 'DNA'),
x=expression('Time (days after'~italic(Betula)~ ~italic( pubescens)~ 'was fed)'),
fill="Var1")
## Warning: `fun.y` is deprecated. Use `fun` instead.
## Scale for 'x' is already present. Adding another scale for 'x', which will
## replace the existing scale.
plot3
## Warning: Removed 15 rows containing non-finite values (stat_summary_bin).
## Warning: Removed 15 rows containing missing values (geom_point).
ggsave("GH.Time_for_publication_colors.png", plot = plot3, width = 25, height = 16, units = c("cm"))
## Warning: Removed 15 rows containing non-finite values (stat_summary_bin).
## Warning: Removed 15 rows containing missing values (geom_point).
plot8 = ggplot(data = as.data.frame(Sper01@reads),
aes(x = samples(Sper01)$times_from_birch/24,
y = GHP1_00000007 + GHP1_00000030 + GHP1_00000681 + GHP3_00000153 + GHP1_00003018)) +
scale_x_continuous(breaks = c(0, 10, 20, 30)) +
stat_summary_bin(fun.y = mean, geom = "line") +
xlim(-2,30) +
geom_vline (xintercept = 26, colour = "black") +
theme_minimal() +
theme(panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) +
labs(y="Proportion of Betulaceae DNA",
x="Time (days after Betula pubescens was fed)") +
labs(y=expression('Proportion of'~italic(Betulaceae)~ 'DNA'),
x=expression('Time (days after'~italic(Betula)~ ~italic( pubescens)~ 'was fed)'),
fill="Var1")
## Warning: `fun.y` is deprecated. Use `fun` instead.
## Scale for 'x' is already present. Adding another scale for 'x', which will
## replace the existing scale.
plot8
## Warning: Removed 15 rows containing non-finite values (stat_summary_bin).
ggsave("GH.Time_for_publication_oneline.png", plot = plot8, width = 25, height = 16, units = c("cm"))
## Warning: Removed 15 rows containing non-finite values (stat_summary_bin).
plot9 = ggplot(data = as.data.frame(Sper01@reads),
aes(x = samples(Sper01)$times_from_birch/24,
y = GHP1_00000007 + GHP1_00000030 + GHP1_00000681 + GHP3_00000153 + GHP1_00003018)) +
scale_x_continuous(breaks = c(0, 10, 20, 30)) +
geom_point(aes(color = Animal)) +
stat_summary_bin(fun.y = mean, geom = "line") +
xlim(-2,30) +
geom_vline (xintercept = 26, colour = "black", linetype = "dotted") +
theme_minimal() +
theme(panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) +
labs(y="Proportion of birch DNA",
x="Time (days after B. pubescens was fed)") +
labs(x=expression('Time (days after'~italic(B.)~ ~italic( pubescens)~ 'was fed)'),
fill="Var1")
## Warning: `fun.y` is deprecated. Use `fun` instead.
## Scale for 'x' is already present. Adding another scale for 'x', which will
## replace the existing scale.
plot9
## Warning: Removed 15 rows containing non-finite values (stat_summary_bin).
## Warning: Removed 15 rows containing missing values (geom_point).
ggsave("GH.Time_for_publication_onelineanddots.png", plot = plot9, width = 25, height = 16, units = c("cm"))
## Warning: Removed 15 rows containing non-finite values (stat_summary_bin).
## Warning: Removed 15 rows containing missing values (geom_point).
Sper01@reads %>%
as_tibble() %>%
mutate(sample_id = rownames(Sper01)) %>%
pivot_longer(cols = - "sample_id", names_to = "motu_id",values_to = "relfreq") %>%
left_join(Sper01@motus, by = c(motu_id = "id")) %>%
group_by(sample_id,family_name) %>%
summarise(relfreq = sum(relfreq)) %>%
ungroup() %>%
left_join(Sper01@samples, by = "sample_id") %>%
ggplot(aes(x=times_from_birch/24,y=relfreq,col=family_name)) +
geom_point( ) +
xlim(-2,30) +
facet_wrap(. ~ family_name, ncol=2,scales="free_y") +
geom_vline (xintercept = 0.54, colour = "lightgrey") +
geom_vline (xintercept = 1.54, colour = "lightgrey") +
geom_vline (xintercept = 2.54, colour = "lightgrey") +
geom_vline (xintercept = 5.54, colour = "lightgrey") +
geom_vline (xintercept = 6.54, colour = "lightgrey") +
geom_vline (xintercept = 7.54, colour = "lightgrey") +
geom_vline (xintercept = 10.54, colour = "lightgrey") +
geom_vline (xintercept = 11.54, colour = "lightgrey") +
geom_vline (xintercept = 12.54, colour = "lightgrey") +
annotate("text", x = 1.60, y = 0.27, label = "20g") +
annotate("text", x = 6.60, y = 0.27, label = "500g") +
annotate("text", x = 11.60, y = 0.27, label = "2000g") +
geom_vline (xintercept = 25, colour = "black", linetype = "dotted")
## `summarise()` has grouped output by 'sample_id'. You can override using the `.groups` argument.
## Warning: Removed 390 rows containing missing values (geom_point).
Sper01@reads %>%
as_tibble() %>%
mutate(sample_id = rownames(Sper01)) %>%
pivot_longer(cols = - "sample_id", names_to = "motu_id",values_to = "relfreq") %>%
left_join(Sper01@motus, by = c(motu_id = "id")) %>%
group_by(sample_id,family_name) %>%
summarise(relfreq = sum(relfreq)) %>%
ungroup() %>%
left_join(Sper01@samples, by = "sample_id") %>%
group_by(family_name) %>%
mutate(max_relfreq = max(relfreq)) %>%
filter(max_relfreq > 0.2) %>%
ggplot(aes(x=times_from_birch/24,y=relfreq,col=family_name)) +
geom_point( ) +
xlim(-2,30) +
facet_wrap(. ~ family_name, ncol=2,scales="free_y") +
geom_vline (xintercept = 0.54, colour = "lightgrey") +
geom_vline (xintercept = 1.54, colour = "lightgrey") +
geom_vline (xintercept = 2.54, colour = "lightgrey") +
geom_vline (xintercept = 5.54, colour = "lightgrey") +
geom_vline (xintercept = 6.54, colour = "lightgrey") +
geom_vline (xintercept = 7.54, colour = "lightgrey") +
geom_vline (xintercept = 10.54, colour = "lightgrey") +
geom_vline (xintercept = 11.54, colour = "lightgrey") +
geom_vline (xintercept = 12.54, colour = "lightgrey") +
annotate("text", x = 1.60, y = 0.27, label = "20g") +
annotate("text", x = 6.60, y = 0.27, label = "500g") +
annotate("text", x = 11.60, y = 0.27, label = "2000g") +
geom_vline (xintercept = 25, colour = "black", linetype = "dotted")
## `summarise()` has grouped output by 'sample_id'. You can override using the `.groups` argument.
## Warning: Removed 105 rows containing missing values (geom_point).
Sper01@reads %>%
as_tibble() %>%
mutate(sample_id = rownames(Sper01)) %>%
pivot_longer(cols = - "sample_id", names_to = "motu_id",values_to = "relfreq") %>%
left_join(Sper01@motus, by = c(motu_id = "id")) %>%
group_by(sample_id,family_name) %>%
summarise(relfreq = sum(relfreq)) %>%
ungroup() %>%
left_join(Sper01@samples, by = "sample_id") %>%
group_by(family_name) %>%
mutate(max_relfreq = max(relfreq)) %>%
filter(max_relfreq > 0.15) %>%
ggplot(aes(x=factor(floor(times_from_birch/24)),y=relfreq,col=family_name)) +
geom_boxplot( ) +
facet_wrap(. ~ family_name, ncol=2,scales="free_y") +
geom_vline (xintercept = 0.54, colour = "lightgrey") +
geom_vline (xintercept = 1.54, colour = "lightgrey") +
geom_vline (xintercept = 2.54, colour = "lightgrey") +
geom_vline (xintercept = 5.54, colour = "lightgrey") +
geom_vline (xintercept = 6.54, colour = "lightgrey") +
geom_vline (xintercept = 7.54, colour = "lightgrey") +
geom_vline (xintercept = 10.54, colour = "lightgrey") +
geom_vline (xintercept = 11.54, colour = "lightgrey") +
geom_vline (xintercept = 12.54, colour = "lightgrey") +
annotate("text", x = 1.60, y = 0.27, label = "20g") +
annotate("text", x = 6.60, y = 0.27, label = "500g") +
annotate("text", x = 11.60, y = 0.27, label = "2000g") +
geom_vline (xintercept = 25, colour = "black", linetype = "dotted")
## `summarise()` has grouped output by 'sample_id'. You can override using the `.groups` argument.
data_Sper01 = cbind(as.data.frame(Sper01@reads),
time = samples(Sper01)$times_from_birch/24,
animal_id = samples(Sper01)$animal_id) %>%
mutate(betula=GHP1_00000007 + GHP1_00000030 + GHP1_00000681 + GHP3_00000153 + GHP1_00003018)
start_time=1
end_time=9
plot1 = ggplot(data = data_Sper01,
aes(x = time,
y = betula,
color = animal_id)) +
geom_point() +
geom_smooth(data = data_Sper01 %>% filter(time >= start_time & time <= end_time),
method = lm,show.legend = FALSE) +
scale_color_manual(values=cbbPalette) +
stat_summary_bin(fun = mean, geom = "line") +
scale_y_log10() +
geom_vline (xintercept = c(0.54,1.54,2.54), colour = cbbPalette[6]) +
geom_vline (xintercept = c(5.54,6.54,7.54), colour = cbbPalette[7]) +
geom_vline (xintercept = c(10.54,11.54,12.54), colour = cbbPalette[8]) +
geom_vline (xintercept = 26, colour = "black") +
scale_x_continuous(breaks = scales::pretty_breaks(n = 15),limits = c(-2,30)) +
guides(color=guide_legend(title="Individual")) +
theme_minimal() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) +
labs(y="Proportion of Betula DNA",
x="Time (days after Betula pubescens was fed)") +
labs(x=expression('Time (days after'~italic(Betula)~ ~italic( pubescens)~ 'was fed)'),
fill="Var1")
plot1
## Warning: Transformation introduced infinite values in continuous y-axis
## Warning: Transformation introduced infinite values in continuous y-axis
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 21 rows containing non-finite values (stat_summary_bin).
## Warning: Removed 15 rows containing missing values (geom_point).
data_Sper01 %>%
filter(time >= start_time &
time <= end_time &
betula > 0 &
animal_id=="X") %>%
lm(time ~ log(betula),data=.) -> lm_9_10
data_Sper01 %>%
filter(time >= start_time &
time <= end_time &
betula > 0 &
animal_id=="Y") %>%
lm(time ~ log(betula),data=.) -> lm_10_10
data_Sper01 %>%
filter(time >= start_time &
time <= end_time &
betula > 0 &
animal_id=="Z") %>%
lm(time ~ log(betula),data=.) -> lm_12_10
data_Sper01 %>%
filter(time >= start_time &
time <= end_time &
betula > 0) %>%
lm(time ~ log(betula) + animal_id,data=.) -> lm_all
half_life <- tibble(animal_id = c("9/10","10/10","12/10","all"),
betula_sper01 = -c(lm_9_10$coefficients[2],
lm_10_10$coefficients[2],
lm_12_10$coefficients[2],
lm_all$coefficients[2]) * log(2) * 24,
ci_betula_sper01 = qnorm(p = c(0.975),
mean = 0,
sd = c(summary(lm_9_10)$coefficients[,"Std. Error"][2],
summary(lm_10_10)$coefficients[,"Std. Error"][2],
summary(lm_12_10)$coefficients[,"Std. Error"][2],
summary(lm_all)$coefficients[,"Std. Error"][2]))
* log(2) * 24,
ci_betula_sper01_inf = betula_sper01 - ci_betula_sper01,
ci_betula_sper01_sup = betula_sper01 + ci_betula_sper01)
half_life
Sper01@reads %>%
as_tibble() %>%
mutate(sample_id = rownames(Sper01)) %>%
pivot_longer(cols = - "sample_id", names_to = "motu_id",values_to = "relfreq") %>%
left_join(Sper01@motus %>% select(- starts_with("obiclean_status") ),
by = c(motu_id = "id")) %>%
ungroup() %>%
left_join(Sper01@samples, by = "sample_id") %>%
mutate(times_from_birch = times_from_birch/24,
time_group = floor(times_from_birch)) %>%
filter(time_group < 26) %>%
group_by(family_name) %>%
summarise(mean_relfreq = mean(relfreq)) %>%
arrange(desc(mean_relfreq)) -> family_means
family_means
Sper01@reads %>%
as_tibble() %>%
mutate(sample_id = rownames(Sper01)) %>%
pivot_longer(cols = - "sample_id", names_to = "motu_id",values_to = "relfreq") %>%
left_join(Sper01@motus %>% select(- starts_with("obiclean_status") ),
by = c(motu_id = "id")) %>%
ungroup() %>%
left_join(Sper01@samples, by = "sample_id") %>%
mutate(times_from_birch = times_from_birch/24,
time_group = floor(times_from_birch)) %>%
filter(time_group < 26) %>%
group_by(family_name,animal_id) %>%
mutate(mean_relfreq = mean(relfreq)) %>%
arrange(desc(mean_relfreq)) %>%
group_by(family_name,animal_id,mean_relfreq,time_group) %>%
summarise(mean_group = mean(relfreq)) %>%
mutate(correction = mean_relfreq/mean_group) %>%
ungroup() -> corrections
## `summarise()` has grouped output by 'family_name', 'animal_id', 'mean_relfreq'. You can override using the `.groups` argument.
corrections
Sper01@reads %>%
as_tibble() %>%
mutate(sample_id = rownames(Sper01)) %>%
pivot_longer(cols = - "sample_id", names_to = "motu_id",values_to = "relfreq") %>%
left_join(Sper01@motus %>% select(- starts_with("obiclean_status") ),
by = c(motu_id = "id")) %>%
ungroup() %>%
left_join(Sper01@samples, by = "sample_id") %>%
mutate(times_from_birch = times_from_birch/24,
time_group = floor(times_from_birch)) %>%
filter(time_group < 26) %>%
left_join(corrections %>%
filter(family_name == "Juncaceae") %>%
select(animal_id, time_group,correction)) %>%
mutate(cor_relfreq = relfreq * correction) %>%
filter(family_name=="Betulaceae") %>%
group_by(sample_id,Animal_id,times_from_birch,time_group) %>%
summarise(relfreq=sum(cor_relfreq)) -> corrrected_data_Sper01
## Joining, by = c("animal_id", "time_group")
## `summarise()` has grouped output by 'sample_id', 'Animal_id', 'times_from_birch'. You can override using the `.groups` argument.
ggplot(data = corrrected_data_Sper01,
aes(x = times_from_birch,
y = relfreq,
color = Animal_id)) +
geom_point() +
geom_smooth(data = corrrected_data_Sper01 %>% filter(times_from_birch >= start_time & times_from_birch <= end_time),
method = lm,show.legend = FALSE) +
scale_color_manual(values=cbbPalette) +
stat_summary_bin(fun = mean, geom = "line") +
scale_y_log10() +
geom_vline (xintercept = c(0.54,1.54,2.54), colour = cbbPalette[6]) +
geom_vline (xintercept = c(5.54,6.54,7.54), colour = cbbPalette[7]) +
geom_vline (xintercept = c(10.54,11.54,12.54), colour = cbbPalette[8]) +
geom_vline (xintercept = 26, colour = "black") +
scale_x_continuous(breaks = scales::pretty_breaks(n = 15),limits = c(-2,30)) +
guides(color=guide_legend(title="Individual")) +
theme_minimal() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) +
labs(y="Proportion of Betula DNA",
x="Time (days after Betula pubescens was fed)") +
labs(x=expression('Time (days after'~italic(Betula)~ ~italic( pubescens)~ 'was fed)'),
fill="Var1")
## Warning: Transformation introduced infinite values in continuous y-axis
## Warning: Transformation introduced infinite values in continuous y-axis
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 6 rows containing non-finite values (stat_summary_bin).
## Warning: Removed 1 rows containing missing values (geom_point).
corrrected_data_Sper01 %>%
filter(times_from_birch >= start_time &
times_from_birch <= end_time &
relfreq > 0 &
Animal_id=="9/10") %>%
lm(times_from_birch ~ log(relfreq),data=.) -> lm_9_10
corrrected_data_Sper01 %>%
filter(times_from_birch >= start_time &
times_from_birch <= end_time &
relfreq > 0 &
Animal_id=="10/10") %>%
lm(times_from_birch ~ log(relfreq),data=.) -> lm_10_10
corrrected_data_Sper01 %>%
filter(times_from_birch >= start_time &
times_from_birch <= end_time &
relfreq > 0 &
Animal_id=="12/10") %>%
lm(times_from_birch ~ log(relfreq),data=.) -> lm_12_10
corrrected_data_Sper01 %>%
filter(times_from_birch >= start_time &
times_from_birch <= end_time &
relfreq > 0) %>%
lm(times_from_birch ~ log(relfreq) + Animal_id,data=.) -> lm_all
half_life <- tibble(animal_id = c("9/10","10/10","12/10","all"),
betula_sper01 = -c(lm_9_10$coefficients[2],
lm_10_10$coefficients[2],
lm_12_10$coefficients[2],
lm_all$coefficients[2]) * log(2) * 24,
ci_betula_sper01 = qnorm(p = c(0.975),
mean = 0,
sd = c(summary(lm_9_10)$coefficients[,"Std. Error"][2],
summary(lm_10_10)$coefficients[,"Std. Error"][2],
summary(lm_12_10)$coefficients[,"Std. Error"][2],
summary(lm_all)$coefficients[,"Std. Error"][2]))
* log(2) * 24,
ci_betula_sper01_inf = betula_sper01 - ci_betula_sper01,
ci_betula_sper01_sup = betula_sper01 + ci_betula_sper01)
half_life
cumpellet <- function(init,data,demi) {
reponse = numeric(length = length(data))
current = init
for (i in seq_along(data)) {
reponse[i] = current/2^(24/demi) + data[i]
current = reponse[i]
}
reponse
}
pellet <- read_delim("Data/pellet_weigth.txt",delim="\t",trim_ws = TRUE) %>%
pivot_longer(-Date,names_to = "Animal_id",values_to = "pellet") %>%
mutate(sortable_date = sapply(str_split(Date,"/"),
FUN = function(x) paste(rev(x),collapse = "/"))) %>%
group_by(Animal_id) %>%
arrange(sortable_date) %>%
mutate(cumul_pellet = cumpellet(2000,pellet,24)) %>%
ungroup()
##
## ── Column specification ────────────────────────────────────────────────────────
## cols(
## Date = col_character(),
## `9/10` = col_double(),
## `10/10` = col_double(),
## `12/10` = col_double()
## )
Sper01@samples %>%
left_join(pellet) %>%
mutate(times_from_birch = times_from_birch / 24,
time_group = floor(times_from_birch)) -> Sper01@samples
## Joining, by = c("Date", "Animal_id")
Sper01@reads %>%
as_tibble() %>%
mutate(sample_id = rownames(Sper01)) %>%
pivot_longer(cols = - "sample_id", names_to = "motu_id",values_to = "relfreq") %>%
left_join(Sper01@motus %>% select(- starts_with("obiclean_status") ),
by = c(motu_id = "id")) %>%
ungroup() %>%
left_join(Sper01@samples, by = "sample_id") %>%
filter(time_group < 26) %>%
group_by(family_name,animal_id,time_group,pellet,cumul_pellet) %>%
summarise(mean_group = mean(relfreq)) %>%
mutate(correction = pellet/mean_group) %>%
ungroup() -> corrections
## `summarise()` has grouped output by 'family_name', 'animal_id', 'time_group', 'pellet'. You can override using the `.groups` argument.
corrections
Sper01@reads %>%
as_tibble() %>%
mutate(sample_id = rownames(Sper01)) %>%
pivot_longer(cols = - "sample_id", names_to = "motu_id",values_to = "relfreq") %>%
left_join(Sper01@motus %>% select(- starts_with("obiclean_status") ),
by = c(motu_id = "id")) %>%
ungroup() %>%
left_join(Sper01@samples, by = "sample_id") %>%
filter(time_group < 26) %>%
left_join(corrections %>%
filter(family_name == "Brassicaceae") %>%
select(animal_id, time_group,correction)) %>%
mutate(cor_relfreq = relfreq * correction) %>%
filter(family_name=="Betulaceae") %>%
group_by(sample_id,Animal_id,times_from_birch,time_group) %>%
summarise(relfreq=sum(cor_relfreq)) -> corrrected_data_Sper01
## Joining, by = c("animal_id", "time_group")
## `summarise()` has grouped output by 'sample_id', 'Animal_id', 'times_from_birch'. You can override using the `.groups` argument.
ggplot(data = corrrected_data_Sper01,
aes(x = times_from_birch,
y = relfreq,
color = Animal_id)) +
geom_point() +
geom_smooth(data = corrrected_data_Sper01 %>% filter(times_from_birch >= start_time & times_from_birch <= end_time),
method = lm,show.legend = FALSE) +
scale_color_manual(values=cbbPalette) +
stat_summary_bin(fun = mean, geom = "line") +
scale_y_log10() +
geom_vline (xintercept = c(0.54,1.54,2.54), colour = cbbPalette[6]) +
geom_vline (xintercept = c(5.54,6.54,7.54), colour = cbbPalette[7]) +
geom_vline (xintercept = c(10.54,11.54,12.54), colour = cbbPalette[8]) +
geom_vline (xintercept = 26, colour = "black") +
scale_x_continuous(breaks = scales::pretty_breaks(n = 15),limits = c(-2,30)) +
guides(color=guide_legend(title="Individual")) +
theme_minimal() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black")) +
labs(y="Proportion of Betula DNA",
x="Time (days after Betula pubescens was fed)") +
labs(x=expression('Time (days after'~italic(Betula)~ ~italic( pubescens)~ 'was fed)'),
fill="Var1")
## Warning: Transformation introduced infinite values in continuous y-axis
## Warning: Transformation introduced infinite values in continuous y-axis
## `geom_smooth()` using formula 'y ~ x'
## Warning: Removed 10 rows containing non-finite values (stat_summary_bin).
## Warning: Removed 6 rows containing missing values (geom_point).
corrrected_data_Sper01 %>%
filter(times_from_birch >= start_time &
times_from_birch <= end_time &
relfreq > 0 &
Animal_id=="9/10") %>%
lm(times_from_birch ~ log(relfreq),data=.) -> lm_9_10
corrrected_data_Sper01 %>%
filter(times_from_birch >= start_time &
times_from_birch <= end_time &
relfreq > 0 &
Animal_id=="10/10") %>%
lm(times_from_birch ~ log(relfreq),data=.) -> lm_10_10
corrrected_data_Sper01 %>%
filter(times_from_birch >= start_time &
times_from_birch <= end_time &
relfreq > 0 &
Animal_id=="12/10") %>%
lm(times_from_birch ~ log(relfreq),data=.) -> lm_12_10
corrrected_data_Sper01 %>%
filter(times_from_birch >= start_time &
times_from_birch <= end_time &
relfreq > 0) %>%
lm(times_from_birch ~ log(relfreq) + Animal_id,data=.) -> lm_all
half_life <- tibble(animal_id = c("9/10","10/10","12/10","all"),
betula_sper01 = -c(lm_9_10$coefficients[2],
lm_10_10$coefficients[2],
lm_12_10$coefficients[2],
lm_all$coefficients[2]) * log(2) * 24,
ci_betula_sper01 = qnorm(p = c(0.975),
mean = 0,
sd = c(summary(lm_9_10)$coefficients[,"Std. Error"][2],
summary(lm_10_10)$coefficients[,"Std. Error"][2],
summary(lm_12_10)$coefficients[,"Std. Error"][2],
summary(lm_all)$coefficients[,"Std. Error"][2]))
* log(2) * 24,
ci_betula_sper01_inf = betula_sper01 - ci_betula_sper01,
ci_betula_sper01_sup = betula_sper01 + ci_betula_sper01)
half_life